--- apiVersion: v1 kind: Pod metadata: annotations: kubectl.kubernetes.io/default-container: kube-apiserver kubernetes.io/config.hash: dcee84e50e891b3b96d641a4f9f6a202 kubernetes.io/config.mirror: dcee84e50e891b3b96d641a4f9f6a202 kubernetes.io/config.seen: "2025-12-03T14:27:32.344055385Z" kubernetes.io/config.source: file target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' creationTimestamp: "2025-12-03T14:27:55Z" labels: apiserver: "true" app: openshift-kube-apiserver revision: "6" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:kubectl.kubernetes.io/default-container: {} f:kubernetes.io/config.hash: {} f:kubernetes.io/config.mirror: {} f:kubernetes.io/config.seen: {} f:kubernetes.io/config.source: {} f:target.workload.openshift.io/management: {} f:labels: .: {} f:apiserver: {} f:app: {} f:revision: {} f:ownerReferences: .: {} k:{"uid":"bd7abe6d-36d2-4ce8-92a1-7fdd9966d574"}: {} f:spec: f:containers: k:{"name":"kube-apiserver"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"GOGC"}: .: {} f:name: {} f:value: {} k:{"name":"HOST_IP"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"POD_NAME"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"POD_NAMESPACE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"STATIC_POD_VERSION"}: .: {} f:name: {} f:value: {} f:image: {} f:imagePullPolicy: {} f:livenessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:path: {} f:port: {} f:scheme: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:name: {} f:ports: .: {} k:{"containerPort":6443,"protocol":"TCP"}: .: {} f:containerPort: {} f:hostPort: {} f:protocol: {} f:readinessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:path: {} f:port: {} f:scheme: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:securityContext: .: {} f:privileged: {} f:startupProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:path: {} f:port: {} f:scheme: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/kubernetes/static-pod-certs"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/kubernetes/static-pod-resources"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/var/log/kube-apiserver"}: .: {} f:mountPath: {} f:name: {} k:{"name":"kube-apiserver-cert-regeneration-controller"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"POD_NAMESPACE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/kubernetes/static-pod-resources"}: .: {} f:mountPath: {} f:name: {} k:{"name":"kube-apiserver-cert-syncer"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"POD_NAME"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"POD_NAMESPACE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/kubernetes/static-pod-certs"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/kubernetes/static-pod-resources"}: .: {} f:mountPath: {} f:name: {} k:{"name":"kube-apiserver-check-endpoints"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"POD_NAME"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"POD_NAMESPACE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} f:image: {} f:imagePullPolicy: {} f:livenessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:path: {} f:port: {} f:scheme: {} f:initialDelaySeconds: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:name: {} f:ports: .: {} k:{"containerPort":17697,"protocol":"TCP"}: .: {} f:containerPort: {} f:hostPort: {} f:name: {} f:protocol: {} f:readinessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:path: {} f:port: {} f:scheme: {} f:initialDelaySeconds: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/kubernetes/static-pod-certs"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/kubernetes/static-pod-resources"}: .: {} f:mountPath: {} f:name: {} k:{"name":"kube-apiserver-insecure-readyz"}: .: {} f:args: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":6080,"protocol":"TCP"}: .: {} f:containerPort: {} f:hostPort: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:hostNetwork: {} f:initContainers: .: {} k:{"name":"setup"}: .: {} f:args: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:securityContext: .: {} f:privileged: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/var/log/kube-apiserver"}: .: {} f:mountPath: {} f:name: {} f:nodeName: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} f:volumes: .: {} k:{"name":"audit-dir"}: .: {} f:hostPath: .: {} f:path: {} f:type: {} f:name: {} k:{"name":"cert-dir"}: .: {} f:hostPath: .: {} f:path: {} f:type: {} f:name: {} k:{"name":"resource-dir"}: .: {} f:hostPath: .: {} f:path: {} f:type: {} f:name: {} manager: kubelet operation: Update time: "2025-12-03T14:27:55Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: .: {} k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodReadyToStartContainers"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodScheduled"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:hostIPs: {} f:initContainerStatuses: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"192.168.32.10"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2025-12-03T14:28:10Z" name: kube-apiserver-master-0 namespace: openshift-kube-apiserver ownerReferences: - apiVersion: v1 controller: true kind: Node name: master-0 uid: bd7abe6d-36d2-4ce8-92a1-7fdd9966d574 resourceVersion: "23095" uid: 3f81ebc5-3e17-4971-b0b1-45a337a2142d spec: containers: - args: - | LOCK=/var/log/kube-apiserver/.lock # We should be able to acquire the lock immediatelly. If not, it means the init container has not released it yet and kubelet or CRI-O started container prematurely. exec {LOCK_FD}>${LOCK} && flock --verbose -w 30 "${LOCK_FD}" || { echo "Failed to acquire lock for kube-apiserver. Please check setup container for details. This is likely kubelet or CRI-O bug." exit 1 } if [ -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt ]; then echo "Copying system trust bundle ..." cp -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem fi exec watch-termination --termination-touch-file=/var/log/kube-apiserver/.terminating --termination-log-file=/var/log/kube-apiserver/termination.log --graceful-termination-duration=15s --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig -- hyperkube kube-apiserver --openshift-config=/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml --advertise-address=${HOST_IP} -v=2 --permit-address-sharing command: - /bin/bash - -ec env: - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: STATIC_POD_VERSION value: "6" - name: HOST_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.hostIP - name: GOGC value: "100" image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0bb91faa6e9f82b589a6535665e51517abe4a1b2eb5d0b3a36b36df6a5330a0 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: path: livez?exclude=etcd port: 6443 scheme: HTTPS periodSeconds: 10 successThreshold: 1 timeoutSeconds: 10 name: kube-apiserver ports: - containerPort: 6443 hostPort: 6443 protocol: TCP readinessProbe: failureThreshold: 3 httpGet: path: readyz port: 6443 scheme: HTTPS periodSeconds: 5 successThreshold: 1 timeoutSeconds: 10 resources: requests: cpu: 265m memory: 1Gi securityContext: privileged: true startupProbe: failureThreshold: 30 httpGet: path: livez port: 6443 scheme: HTTPS periodSeconds: 5 successThreshold: 1 timeoutSeconds: 10 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/kubernetes/static-pod-resources name: resource-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir - mountPath: /var/log/kube-apiserver name: audit-dir - args: - --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig - --namespace=$(POD_NAMESPACE) - --destination-dir=/etc/kubernetes/static-pod-certs command: - cluster-kube-apiserver-operator - cert-syncer env: - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imagePullPolicy: IfNotPresent name: kube-apiserver-cert-syncer resources: requests: cpu: 5m memory: 50Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/kubernetes/static-pod-resources name: resource-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir - args: - --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig - --namespace=$(POD_NAMESPACE) - -v=2 command: - cluster-kube-apiserver-operator - cert-regeneration-controller env: - name: POD_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imagePullPolicy: IfNotPresent name: kube-apiserver-cert-regeneration-controller resources: requests: cpu: 5m memory: 50Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/kubernetes/static-pod-resources name: resource-dir - args: - --insecure-port=6080 - --delegate-url=https://localhost:6443/readyz command: - cluster-kube-apiserver-operator - insecure-readyz image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imagePullPolicy: IfNotPresent name: kube-apiserver-insecure-readyz ports: - containerPort: 6080 hostPort: 6080 protocol: TCP resources: requests: cpu: 5m memory: 50Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError - args: - --kubeconfig - /etc/kubernetes/static-pod-certs/configmaps/check-endpoints-kubeconfig/kubeconfig - --listen - 0.0.0.0:17697 - --namespace - $(POD_NAMESPACE) - --v - "2" command: - cluster-kube-apiserver-operator - check-endpoints env: - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: path: healthz port: 17697 scheme: HTTPS initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 10 name: kube-apiserver-check-endpoints ports: - containerPort: 17697 hostPort: 17697 name: check-endpoints protocol: TCP readinessProbe: failureThreshold: 3 httpGet: path: healthz port: 17697 scheme: HTTPS initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 10 resources: requests: cpu: 10m memory: 50Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/kubernetes/static-pod-resources name: resource-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir dnsPolicy: ClusterFirst enableServiceLinks: true hostNetwork: true initContainers: - args: - | echo "Fixing audit permissions ..." chmod 0700 /var/log/kube-apiserver && touch /var/log/kube-apiserver/audit.log && chmod 0600 /var/log/kube-apiserver/* LOCK=/var/log/kube-apiserver/.lock echo "Acquiring exclusive lock ${LOCK} ..." # Waiting for 15s max for old kube-apiserver's watch-termination process to exit and remove the lock. # Two cases: # 1. if kubelet does not start the old and new in parallel (i.e. works as expected), the flock will always succeed without any time. # 2. if kubelet does overlap old and new pods for up to 130s, the flock will wait and immediate return when the old finishes. # # NOTE: We can increase 15s for a bigger expected overlap. But a higher value means less noise about the broken kubelet behaviour, i.e. we hide a bug. # NOTE: Do not tweak these timings without considering the livenessProbe initialDelaySeconds exec {LOCK_FD}>${LOCK} && flock --verbose -w 15 "${LOCK_FD}" || { echo "$(date -Iseconds -u) kubelet did not terminate old kube-apiserver before new one" >> /var/log/kube-apiserver/lock.log echo -n ": WARNING: kubelet did not terminate old kube-apiserver before new one." # We failed to acquire exclusive lock, which means there is old kube-apiserver running in system. # Since we utilize SO_REUSEPORT, we need to make sure the old kube-apiserver stopped listening. # # NOTE: This is a fallback for broken kubelet, if you observe this please report a bug. echo -n "Waiting for port 6443 to be released due to likely bug in kubelet or CRI-O " while [ -n "$(ss -Htan state listening '( sport = 6443 or sport = 6080 )')" ]; do echo -n "." sleep 1 (( tries += 1 )) if [[ "${tries}" -gt 10 ]]; then echo "Timed out waiting for port :6443 and :6080 to be released, this is likely a bug in kubelet or CRI-O" exit 1 fi done # This is to make sure the server has terminated independently from the lock. # After the port has been freed (requests can be pending and need 60s max). sleep 65 } # We cannot hold the lock from the init container to the main container. We release it here. There is no risk, at this point we know we are safe. flock -u "${LOCK_FD}" command: - /usr/bin/timeout - "100" - /bin/bash - -ec image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0bb91faa6e9f82b589a6535665e51517abe4a1b2eb5d0b3a36b36df6a5330a0 imagePullPolicy: IfNotPresent name: setup resources: requests: cpu: 5m memory: 50Mi securityContext: privileged: true terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /var/log/kube-apiserver name: audit-dir nodeName: master-0 preemptionPolicy: PreemptLowerPriority priority: 2000001000 priorityClassName: system-node-critical restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 15 tolerations: - operator: Exists volumes: - hostPath: path: /etc/kubernetes/static-pod-resources/kube-apiserver-pod-6 type: "" name: resource-dir - hostPath: path: /etc/kubernetes/static-pod-resources/kube-apiserver-certs type: "" name: cert-dir - hostPath: path: /var/log/kube-apiserver type: "" name: audit-dir status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-03T14:28:10Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-03T14:28:10Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-03T14:28:10Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-03T14:28:10Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-03T14:28:10Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://660a7e8daecb8d48aa3ac0501849a1c55e3cdab2e786bfa047f476cdf31f48fc image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0bb91faa6e9f82b589a6535665e51517abe4a1b2eb5d0b3a36b36df6a5330a0 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0bb91faa6e9f82b589a6535665e51517abe4a1b2eb5d0b3a36b36df6a5330a0 lastState: {} name: kube-apiserver ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-03T14:27:47Z" - containerID: cri-o://c1b5f6885c7612a47f89ecf1b5eb852658dbb0394167d4a6e79935e70ec8fd4a image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef lastState: {} name: kube-apiserver-cert-regeneration-controller ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-03T14:27:48Z" - containerID: cri-o://d8c73f9c9117e2ed99284f7ab1fb4e93c432de4c33f0c44b76cc98c819675316 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef lastState: {} name: kube-apiserver-cert-syncer ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-03T14:27:48Z" - containerID: cri-o://46cc10d677e66238de8738cb9a021fc9ce8769bfa5baf7d8bd981e4dd2767b30 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef lastState: {} name: kube-apiserver-check-endpoints ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-03T14:27:48Z" - containerID: cri-o://2d680d362977f571f20e32ea632cfa124a8efba71100f4088530d74d5e7dd98e image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91cbda9693e888881e7c45cd6e504b91ba8a203fe0596237a4a17b3ca4e18eef lastState: {} name: kube-apiserver-insecure-readyz ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-03T14:27:48Z" hostIP: 192.168.32.10 hostIPs: - ip: 192.168.32.10 initContainerStatuses: - containerID: cri-o://e463c29944fb971f88905896f2e8cfcccb9045d01be5d23cf2cc9038a8423e85 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0bb91faa6e9f82b589a6535665e51517abe4a1b2eb5d0b3a36b36df6a5330a0 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0bb91faa6e9f82b589a6535665e51517abe4a1b2eb5d0b3a36b36df6a5330a0 lastState: {} name: setup ready: true restartCount: 0 started: false state: terminated: containerID: cri-o://e463c29944fb971f88905896f2e8cfcccb9045d01be5d23cf2cc9038a8423e85 exitCode: 0 finishedAt: "2025-12-03T14:27:47Z" reason: Completed startedAt: "2025-12-03T14:27:47Z" phase: Running podIP: 192.168.32.10 podIPs: - ip: 192.168.32.10 qosClass: Burstable startTime: "2025-12-03T14:28:10Z"