--- apiVersion: v1 kind: Pod metadata: annotations: capability.openshift.io/name: baremetal include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.128.0.49/23"],"mac_address":"0a:58:0a:80:00:31","gateway_ips":["10.128.0.1"],"routes":[{"dest":"10.128.0.0/16","nextHop":"10.128.0.1"},{"dest":"172.30.0.0/16","nextHop":"10.128.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.128.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.128.0.1"}],"ip_address":"10.128.0.49/23","gateway_ip":"10.128.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.128.0.49" ], "mac": "0a:58:0a:80:00:31", "default": true, "dns": {} }] openshift.io/required-scc: anyuid openshift.io/scc: anyuid creationTimestamp: "2025-12-03T21:50:47Z" generateName: cluster-baremetal-operator-5fdc576499- labels: k8s-app: cluster-baremetal-operator pod-template-hash: 5fdc576499 managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:capability.openshift.io/name: {} f:include.release.openshift.io/self-managed-high-availability: {} f:include.release.openshift.io/single-node-developer: {} f:openshift.io/required-scc: {} f:target.workload.openshift.io/management: {} f:generateName: {} f:labels: .: {} f:k8s-app: {} f:pod-template-hash: {} f:ownerReferences: .: {} k:{"uid":"45cec892-2e16-424b-bf04-155de1944456"}: {} f:spec: f:containers: k:{"name":"baremetal-kube-rbac-proxy"}: .: {} f:args: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":8443,"protocol":"TCP"}: .: {} f:containerPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/baremetal-kube-rbac-proxy"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/tls/private"}: .: {} f:mountPath: {} f:name: {} k:{"name":"cluster-baremetal-operator"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"COMPONENT_NAMESPACE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"METRICS_PORT"}: .: {} f:name: {} f:value: {} k:{"name":"RELEASE_VERSION"}: .: {} f:name: {} f:value: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":9443,"protocol":"TCP"}: .: {} f:containerPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/cluster-baremetal-operator/images"}: .: {} f:mountPath: {} f:name: {} f:readOnly: {} k:{"mountPath":"/etc/cluster-baremetal-operator/tls"}: .: {} f:mountPath: {} f:name: {} f:readOnly: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:nodeSelector: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: .: {} f:runAsNonRoot: {} f:runAsUser: {} f:serviceAccount: {} f:serviceAccountName: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} f:volumes: .: {} k:{"name":"cert"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:secretName: {} k:{"name":"cluster-baremetal-operator-tls"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:secretName: {} k:{"name":"config"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:name: {} k:{"name":"images"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:name: {} manager: kube-controller-manager operation: Update time: "2025-12-03T21:50:47Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:k8s.ovn.org/pod-networks: {} manager: master-0 operation: Update subresource: status time: "2025-12-03T21:50:47Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:k8s.v1.cni.cncf.io/network-status: {} manager: multus-daemon operation: Update subresource: status time: "2025-12-03T21:50:49Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodReadyToStartContainers"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:hostIPs: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.128.0.49"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2025-12-03T22:05:26Z" name: cluster-baremetal-operator-5fdc576499-q9tf6 namespace: openshift-machine-api ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: cluster-baremetal-operator-5fdc576499 uid: 45cec892-2e16-424b-bf04-155de1944456 resourceVersion: "12393" uid: fa9b5917-d4f3-4372-a200-45b57412f92f spec: containers: - args: - --enable-leader-election command: - /usr/bin/cluster-baremetal-operator env: - name: RELEASE_VERSION value: 4.18.28 - name: COMPONENT_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: METRICS_PORT value: "8080" image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e imagePullPolicy: IfNotPresent name: cluster-baremetal-operator ports: - containerPort: 9443 name: webhook-server protocol: TCP resources: requests: cpu: 10m memory: 50Mi securityContext: capabilities: drop: - MKNOD terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/cluster-baremetal-operator/tls name: cert readOnly: true - mountPath: /etc/cluster-baremetal-operator/images name: images readOnly: true - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-pj79k readOnly: true - args: - --secure-listen-address=0.0.0.0:8443 - --upstream=http://localhost:8080/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - --config-file=/etc/baremetal-kube-rbac-proxy/config-file.yaml - --logtostderr=true - --v=10 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d imagePullPolicy: IfNotPresent name: baremetal-kube-rbac-proxy ports: - containerPort: 8443 name: https protocol: TCP resources: requests: cpu: 10m memory: 20Mi securityContext: capabilities: drop: - MKNOD terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/baremetal-kube-rbac-proxy name: config - mountPath: /etc/tls/private name: cluster-baremetal-operator-tls - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-pj79k readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: cluster-baremetal-operator-dockercfg-zj6wp nodeName: master-0 nodeSelector: node-role.kubernetes.io/master: "" preemptionPolicy: PreemptLowerPriority priority: 2000001000 priorityClassName: system-node-critical restartPolicy: Always schedulerName: default-scheduler securityContext: runAsNonRoot: true runAsUser: 65534 seLinuxOptions: level: s0:c22,c14 serviceAccount: cluster-baremetal-operator serviceAccountName: cluster-baremetal-operator terminationGracePeriodSeconds: 30 tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 120 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 120 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - name: cert secret: defaultMode: 420 secretName: cluster-baremetal-webhook-server-cert - configMap: defaultMode: 420 name: baremetal-kube-rbac-proxy name: config - name: cluster-baremetal-operator-tls secret: defaultMode: 420 secretName: cluster-baremetal-operator-tls - configMap: defaultMode: 420 name: cluster-baremetal-operator-images name: images - name: kube-api-access-pj79k projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-03T21:51:26Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-03T21:50:47Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-03T22:04:06Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-03T22:04:06Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-03T21:50:47Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://2bde1579de05ada97e585d0ce6d0d39ebdf48e9c221db2db2566818b9002dffa image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d lastState: {} name: baremetal-kube-rbac-proxy ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-03T21:51:25Z" volumeMounts: - mountPath: /etc/baremetal-kube-rbac-proxy name: config - mountPath: /etc/tls/private name: cluster-baremetal-operator-tls - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-pj79k readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://0d3b6aa6ddc448fe01323d383917b0efb673a621dc7f776ca02a466310d7eceb image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e lastState: terminated: containerID: cri-o://2c8f1554a8a69b7abb2ac2fb40ac35726b3a427f042bc6ab03c716500f2824ff exitCode: 1 finishedAt: "2025-12-03T22:03:14Z" message: | "="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1.DaemonSet" I1203 21:55:40.345286 1 controller.go:173] "msg"="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1.ClusterOperator" I1203 21:55:40.345316 1 controller.go:173] "msg"="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1.Proxy" I1203 21:55:40.345364 1 controller.go:173] "msg"="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1beta1.Machine" I1203 21:55:40.345394 1 controller.go:181] "msg"="Starting Controller" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" I1203 21:55:40.569888 1 controller.go:215] "msg"="Starting workers" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "worker count"=1 E1203 22:01:27.073555 1 leaderelection.go:340] Failed to update lock optimitically: Timeout: request did not complete within requested timeout - context deadline exceeded, falling back to slow path E1203 22:02:27.077056 1 leaderelection.go:347] error retrieving resource lock openshift-machine-api/cluster-baremetal-operator: the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io cluster-baremetal-operator) I1203 22:02:40.068040 1 leaderelection.go:285] failed to renew lease openshift-machine-api/cluster-baremetal-operator: timed out waiting for the condition E1203 22:03:14.075276 1 leaderelection.go:308] Failed to release lock: Timeout: request did not complete within requested timeout - context deadline exceeded E1203 22:03:14.075411 1 main.go:182] "problem running manager" err="leader election lost" reason: Error startedAt: "2025-12-03T21:55:40Z" name: cluster-baremetal-operator ready: true restartCount: 4 started: true state: running: startedAt: "2025-12-03T22:04:06Z" volumeMounts: - mountPath: /etc/cluster-baremetal-operator/tls name: cert readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/cluster-baremetal-operator/images name: images readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-pj79k readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.32.10 hostIPs: - ip: 192.168.32.10 phase: Running podIP: 10.128.0.49 podIPs: - ip: 10.128.0.49 qosClass: Burstable startTime: "2025-12-03T21:50:47Z"