Name:                 cluster-autoscaler-operator-5f49d774cd-894dk
Namespace:            openshift-machine-api
Priority:             2000001000
Priority Class Name:  system-node-critical
Service Account:      cluster-autoscaler-operator
Node:                 master-0/192.168.32.10
Start Time:           Thu, 04 Dec 2025 11:38:43 +0000
Labels:               k8s-app=cluster-autoscaler-operator
                      pod-template-hash=5f49d774cd
Annotations:          k8s.ovn.org/pod-networks:
                        {"default":{"ip_addresses":["10.128.0.59/23"],"mac_address":"0a:58:0a:80:00:3b","gateway_ips":["10.128.0.1"],"routes":[{"dest":"10.128.0.0...
                      k8s.v1.cni.cncf.io/network-status:
                        [{
                            "name": "ovn-kubernetes",
                            "interface": "eth0",
                            "ips": [
                                "10.128.0.59"
                            ],
                            "mac": "0a:58:0a:80:00:3b",
                            "default": true,
                            "dns": {}
                        }]
                      kubectl.kubernetes.io/default-container: cluster-autoscaler-operator
                      openshift.io/required-scc: restricted-v2
                      openshift.io/scc: restricted-v2
                      seccomp.security.alpha.kubernetes.io/pod: runtime/default
Status:               Running
SeccompProfile:       RuntimeDefault
IP:                   10.128.0.59
IPs:
  IP:           10.128.0.59
Controlled By:  ReplicaSet/cluster-autoscaler-operator-5f49d774cd
Containers:
  kube-rbac-proxy:
    Container ID:  cri-o://8351b3c33e5381d9f8f52c5cbb85fe67b5c8d17b81f220227d907d879cdef1ea
    Image:         quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69ffd8f8dcceedc2d6eb306cea33f8beabc1be1308cd5f4ee8b9a8e3eab9843
    Image ID:      quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69ffd8f8dcceedc2d6eb306cea33f8beabc1be1308cd5f4ee8b9a8e3eab9843
    Port:          9192/TCP
    Host Port:     0/TCP
    Args:
      --secure-listen-address=0.0.0.0:9192
      --upstream=http://127.0.0.1:9191/
      --tls-cert-file=/etc/tls/private/tls.crt
      --tls-private-key-file=/etc/tls/private/tls.key
      --config-file=/etc/kube-rbac-proxy/config-file.yaml
      --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
      --logtostderr=true
      --v=3
    State:          Running
      Started:      Thu, 04 Dec 2025 11:38:44 +0000
    Ready:          True
    Restart Count:  0
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /etc/kube-rbac-proxy from auth-proxy-config (ro)
      /etc/tls/private from cert (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-2pqsl (ro)
  cluster-autoscaler-operator:
    Container ID:  cri-o://29db6007fa122142272c2e747034efd7c5978859754eee3760f27dbc521cf520
    Image:         quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:72bbe2c638872937108f647950ab8ad35c0428ca8ecc6a39a8314aace7d95078
    Image ID:      quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:72bbe2c638872937108f647950ab8ad35c0428ca8ecc6a39a8314aace7d95078
    Port:          8443/TCP
    Host Port:     0/TCP
    Command:
      cluster-autoscaler-operator
    Args:
      -alsologtostderr
    State:       Running
      Started:   Thu, 04 Dec 2025 11:56:04 +0000
    Last State:  Terminated
      Reason:    Error
      Message:    in the time allotted, but may still be processing the request (get leases.coordination.k8s.io cluster-autoscaler-operator-leader)
E1204 11:53:04.405651       1 status.go:311] status reporting failed: the server was unable to return a response in the time allotted, but may still be processing the request (get clusteroperators.config.openshift.io cluster-autoscaler)
E1204 11:54:04.407610       1 status.go:426] failed to get dependency machine-api status: the server was unable to return a response in the time allotted, but may still be processing the request (get clusteroperators.config.openshift.io machine-api)
W1204 11:54:04.407685       1 status.go:271] Operator status degraded: error checking machine-api status: the server was unable to return a response in the time allotted, but may still be processing the request (get clusteroperators.config.openshift.io machine-api)
E1204 11:54:12.578608       1 leaderelection.go:429] Failed to update lock optimitically: Timeout: request did not complete within requested timeout - context deadline exceeded, falling back to slow path
E1204 11:55:04.409885       1 status.go:311] status reporting failed: the server was unable to return a response in the time allotted, but may still be processing the request (get clusteroperators.config.openshift.io cluster-autoscaler)
E1204 11:55:04.563080       1 leaderelection.go:436] error retrieving resource lock openshift-machine-api/cluster-autoscaler-operator-leader: Get "https://172.30.0.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-api/leases/cluster-autoscaler-operator-leader": context deadline exceeded
I1204 11:55:04.563166       1 leaderelection.go:297] failed to renew lease openshift-machine-api/cluster-autoscaler-operator-leader: timed out waiting for the condition
E1204 11:55:38.566293       1 leaderelection.go:322] Failed to release lock: Timeout: request did not complete within requested timeout - context deadline exceeded
F1204 11:55:38.566379       1 main.go:43] Failed to start operator: leader election lost

      Exit Code:    255
      Started:      Thu, 04 Dec 2025 11:46:04 +0000
      Finished:     Thu, 04 Dec 2025 11:55:38 +0000
    Ready:          True
    Restart Count:  3
    Requests:
      cpu:     20m
      memory:  50Mi
    Environment:
      RELEASE_VERSION:               4.18.29
      WATCH_NAMESPACE:               openshift-machine-api (v1:metadata.namespace)
      CLUSTER_AUTOSCALER_NAMESPACE:  openshift-machine-api (v1:metadata.namespace)
      LEADER_ELECTION_NAMESPACE:     openshift-machine-api (v1:metadata.namespace)
      CLUSTER_AUTOSCALER_IMAGE:      quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6127d09ff17b35440f5116598731952167eab9b30ad2218efd25fbb3fd0d7586
      WEBHOOKS_CERT_DIR:             /etc/cluster-autoscaler-operator/tls
      WEBHOOKS_PORT:                 8443
      METRICS_PORT:                  9191
    Mounts:
      /etc/cluster-autoscaler-operator/tls from cert (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-2pqsl (ro)
Conditions:
  Type                        Status
  PodReadyToStartContainers   True 
  Initialized                 True 
  Ready                       True 
  ContainersReady             True 
  PodScheduled                True 
Volumes:
  cert:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  cluster-autoscaler-operator-cert
    Optional:    false
  auth-proxy-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      kube-rbac-proxy-cluster-autoscaler-operator
    Optional:  false
  kube-api-access-2pqsl:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
    ConfigMapName:           openshift-service-ca.crt
    ConfigMapOptional:       <nil>
QoS Class:                   Burstable
Node-Selectors:              node-role.kubernetes.io/master=
Tolerations:                 node-role.kubernetes.io/master:NoSchedule op=Exists
                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason          Age                From               Message
  ----     ------          ----               ----               -------
  Normal   Scheduled       70m                default-scheduler  Successfully assigned openshift-machine-api/cluster-autoscaler-operator-5f49d774cd-894dk to master-0
  Normal   AddedInterface  70m                multus             Add eth0 [10.128.0.59/23] from ovn-kubernetes
  Normal   Pulled          70m                kubelet            Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69ffd8f8dcceedc2d6eb306cea33f8beabc1be1308cd5f4ee8b9a8e3eab9843" already present on machine
  Normal   Created         70m                kubelet            Created container: kube-rbac-proxy
  Normal   Started         70m                kubelet            Started container kube-rbac-proxy
  Normal   Pulling         70m                kubelet            Pulling image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:72bbe2c638872937108f647950ab8ad35c0428ca8ecc6a39a8314aace7d95078"
  Normal   Pulled          70m                kubelet            Successfully pulled image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:72bbe2c638872937108f647950ab8ad35c0428ca8ecc6a39a8314aace7d95078" in 36.853s (36.853s including waiting). Image size: 450841337 bytes.
  Warning  BackOff         53m (x3 over 63m)  kubelet            Back-off restarting failed container cluster-autoscaler-operator in pod cluster-autoscaler-operator-5f49d774cd-894dk_openshift-machine-api(e7fc7c16-5bca-49e5-aff0-7a8f80c6b639)
  Normal   Created         53m (x4 over 70m)  kubelet            Created container: cluster-autoscaler-operator
  Normal   Started         53m (x4 over 70m)  kubelet            Started container cluster-autoscaler-operator
  Normal   Pulled          53m (x3 over 64m)  kubelet            Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:72bbe2c638872937108f647950ab8ad35c0428ca8ecc6a39a8314aace7d95078" already present on machine
  Warning  FailedMount     50m                kubelet            MountVolume.SetUp failed for volume "auth-proxy-config" : failed to sync configmap cache: timed out waiting for the condition
  Warning  FailedMount     50m                kubelet            MountVolume.SetUp failed for volume "cert" : failed to sync secret cache: timed out waiting for the condition
