Name:                 cluster-baremetal-operator-5fdc576499-q9tf6
Namespace:            openshift-machine-api
Priority:             2000001000
Priority Class Name:  system-node-critical
Service Account:      cluster-baremetal-operator
Node:                 master-0/192.168.32.10
Start Time:           Wed, 03 Dec 2025 21:50:47 +0000
Labels:               k8s-app=cluster-baremetal-operator
                      pod-template-hash=5fdc576499
Annotations:          capability.openshift.io/name: baremetal
                      include.release.openshift.io/self-managed-high-availability: true
                      include.release.openshift.io/single-node-developer: true
                      k8s.ovn.org/pod-networks:
                        {"default":{"ip_addresses":["10.128.0.49/23"],"mac_address":"0a:58:0a:80:00:31","gateway_ips":["10.128.0.1"],"routes":[{"dest":"10.128.0.0...
                      k8s.v1.cni.cncf.io/network-status:
                        [{
                            "name": "ovn-kubernetes",
                            "interface": "eth0",
                            "ips": [
                                "10.128.0.49"
                            ],
                            "mac": "0a:58:0a:80:00:31",
                            "default": true,
                            "dns": {}
                        }]
                      openshift.io/required-scc: anyuid
                      openshift.io/scc: anyuid
Status:               Running
IP:                   10.128.0.49
IPs:
  IP:           10.128.0.49
Controlled By:  ReplicaSet/cluster-baremetal-operator-5fdc576499
Containers:
  cluster-baremetal-operator:
    Container ID:  cri-o://0d3b6aa6ddc448fe01323d383917b0efb673a621dc7f776ca02a466310d7eceb
    Image:         quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e
    Image ID:      quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e
    Port:          9443/TCP
    Host Port:     0/TCP
    Command:
      /usr/bin/cluster-baremetal-operator
    Args:
      --enable-leader-election
    State:       Running
      Started:   Wed, 03 Dec 2025 22:04:06 +0000
    Last State:  Terminated
      Reason:    Error
      Message:   "="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1.DaemonSet"
I1203 21:55:40.345286       1 controller.go:173] "msg"="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1.ClusterOperator"
I1203 21:55:40.345316       1 controller.go:173] "msg"="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1.Proxy"
I1203 21:55:40.345364       1 controller.go:173] "msg"="Starting EventSource" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "source"="kind source: *v1beta1.Machine"
I1203 21:55:40.345394       1 controller.go:181] "msg"="Starting Controller" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning"
I1203 21:55:40.569888       1 controller.go:215] "msg"="Starting workers" "controller"="provisioning" "controllerGroup"="metal3.io" "controllerKind"="Provisioning" "worker count"=1
E1203 22:01:27.073555       1 leaderelection.go:340] Failed to update lock optimitically: Timeout: request did not complete within requested timeout - context deadline exceeded, falling back to slow path
E1203 22:02:27.077056       1 leaderelection.go:347] error retrieving resource lock openshift-machine-api/cluster-baremetal-operator: the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io cluster-baremetal-operator)
I1203 22:02:40.068040       1 leaderelection.go:285] failed to renew lease openshift-machine-api/cluster-baremetal-operator: timed out waiting for the condition
E1203 22:03:14.075276       1 leaderelection.go:308] Failed to release lock: Timeout: request did not complete within requested timeout - context deadline exceeded
E1203 22:03:14.075411       1 main.go:182] "problem running manager" err="leader election lost"

      Exit Code:    1
      Started:      Wed, 03 Dec 2025 21:55:40 +0000
      Finished:     Wed, 03 Dec 2025 22:03:14 +0000
    Ready:          True
    Restart Count:  4
    Requests:
      cpu:     10m
      memory:  50Mi
    Environment:
      RELEASE_VERSION:      4.18.28
      COMPONENT_NAMESPACE:  openshift-machine-api (v1:metadata.namespace)
      METRICS_PORT:         8080
    Mounts:
      /etc/cluster-baremetal-operator/images from images (ro)
      /etc/cluster-baremetal-operator/tls from cert (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pj79k (ro)
  baremetal-kube-rbac-proxy:
    Container ID:  cri-o://2bde1579de05ada97e585d0ce6d0d39ebdf48e9c221db2db2566818b9002dffa
    Image:         quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d
    Image ID:      quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d
    Port:          8443/TCP
    Host Port:     0/TCP
    Args:
      --secure-listen-address=0.0.0.0:8443
      --upstream=http://localhost:8080/
      --tls-cert-file=/etc/tls/private/tls.crt
      --tls-private-key-file=/etc/tls/private/tls.key
      --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
      --config-file=/etc/baremetal-kube-rbac-proxy/config-file.yaml
      --logtostderr=true
      --v=10
    State:          Running
      Started:      Wed, 03 Dec 2025 21:51:25 +0000
    Ready:          True
    Restart Count:  0
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /etc/baremetal-kube-rbac-proxy from config (rw)
      /etc/tls/private from cluster-baremetal-operator-tls (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pj79k (ro)
Conditions:
  Type                        Status
  PodReadyToStartContainers   True 
  Initialized                 True 
  Ready                       True 
  ContainersReady             True 
  PodScheduled                True 
Volumes:
  cert:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  cluster-baremetal-webhook-server-cert
    Optional:    false
  config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      baremetal-kube-rbac-proxy
    Optional:  false
  cluster-baremetal-operator-tls:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  cluster-baremetal-operator-tls
    Optional:    false
  images:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      cluster-baremetal-operator-images
    Optional:  false
  kube-api-access-pj79k:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
    ConfigMapName:           openshift-service-ca.crt
    ConfigMapOptional:       <nil>
QoS Class:                   Burstable
Node-Selectors:              node-role.kubernetes.io/master=
Tolerations:                 node-role.kubernetes.io/master:NoSchedule op=Exists
                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 120s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 120s
Events:
  Type     Reason          Age                  From               Message
  ----     ------          ----                 ----               -------
  Normal   Scheduled       114m                 default-scheduler  Successfully assigned openshift-machine-api/cluster-baremetal-operator-5fdc576499-q9tf6 to master-0
  Normal   AddedInterface  114m                 multus             Add eth0 [10.128.0.49/23] from ovn-kubernetes
  Normal   Pulling         114m                 kubelet            Pulling image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e"
  Normal   Pulled          113m                 kubelet            Successfully pulled image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e" in 22.922s (22.922s including waiting). Image size: 465302163 bytes.
  Normal   Pulled          113m                 kubelet            Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d" already present on machine
  Normal   Created         113m                 kubelet            Created container: baremetal-kube-rbac-proxy
  Normal   Started         113m                 kubelet            Started container baremetal-kube-rbac-proxy
  Warning  BackOff         101m (x6 over 109m)  kubelet            Back-off restarting failed container cluster-baremetal-operator in pod cluster-baremetal-operator-5fdc576499-q9tf6_openshift-machine-api(fa9b5917-d4f3-4372-a200-45b57412f92f)
  Normal   Pulled          101m (x4 over 112m)  kubelet            Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b294511902fd7a80e135b23895a944570932dc0fab1ee22f296523840740332e" already present on machine
  Normal   Started         101m (x5 over 113m)  kubelet            Started container cluster-baremetal-operator
  Normal   Created         101m (x5 over 113m)  kubelet            Created container: cluster-baremetal-operator
  Warning  FailedMount     94m                  kubelet            MountVolume.SetUp failed for volume "cluster-baremetal-operator-tls" : failed to sync secret cache: timed out waiting for the condition
  Warning  FailedMount     94m                  kubelet            MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition
  Warning  FailedMount     94m                  kubelet            MountVolume.SetUp failed for volume "cert" : failed to sync secret cache: timed out waiting for the condition
  Warning  FailedMount     94m                  kubelet            MountVolume.SetUp failed for volume "images" : failed to sync configmap cache: timed out waiting for the condition
