--- apiVersion: v1 items: - apiVersion: v1 kind: Pod metadata: creationTimestamp: "2025-12-04T00:35:01Z" generateName: machine-approver-cb84b9cdf- labels: app: machine-approver pod-template-hash: cb84b9cdf managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:target.workload.openshift.io/management: {} f:generateName: {} f:labels: .: {} f:app: {} f:pod-template-hash: {} f:ownerReferences: .: {} k:{"uid":"4bd7a541-0e92-473d-8e15-9ce06a0f98d8"}: {} f:spec: f:containers: k:{"name":"kube-rbac-proxy"}: .: {} f:args: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":9192,"protocol":"TCP"}: .: {} f:containerPort: {} f:hostPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/kube-rbac-proxy"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/tls/private"}: .: {} f:mountPath: {} f:name: {} k:{"name":"machine-approver-controller"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"RELEASE_VERSION"}: .: {} f:name: {} f:value: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/var/run/configmaps/config"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:hostNetwork: {} f:nodeSelector: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:serviceAccount: {} f:serviceAccountName: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} f:volumes: .: {} k:{"name":"auth-proxy-config"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:name: {} k:{"name":"config"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:optional: {} f:name: {} k:{"name":"machine-approver-tls"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:secretName: {} manager: kube-controller-manager operation: Update time: "2025-12-04T00:35:01Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodReadyToStartContainers"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:hostIPs: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"192.168.32.10"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2025-12-04T00:45:00Z" name: machine-approver-cb84b9cdf-tsm24 namespace: openshift-cluster-machine-approver ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: machine-approver-cb84b9cdf uid: 4bd7a541-0e92-473d-8e15-9ce06a0f98d8 resourceVersion: "11897" uid: 62e34e8d-fd5e-4d7b-81f4-3ad911b3dedb spec: containers: - args: - --secure-listen-address=0.0.0.0:9192 - --upstream=http://127.0.0.1:9191/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key - --config-file=/etc/kube-rbac-proxy/config-file.yaml - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - --logtostderr=true - --v=3 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d imagePullPolicy: IfNotPresent name: kube-rbac-proxy ports: - containerPort: 9192 hostPort: 9192 name: https protocol: TCP resources: requests: cpu: 10m memory: 20Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/kube-rbac-proxy name: auth-proxy-config - mountPath: /etc/tls/private name: machine-approver-tls - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9zdwv readOnly: true - args: - --config=/var/run/configmaps/config/config.yaml - -v=2 - --logtostderr - --leader-elect=true - --leader-elect-lease-duration=137s - --leader-elect-renew-deadline=107s - --leader-elect-retry-period=26s - --leader-elect-resource-namespace=openshift-cluster-machine-approver - --api-group-version=machine.openshift.io/v1beta1 - --max-concurrent-reconciles=10 command: - /usr/bin/machine-approver env: - name: RELEASE_VERSION value: 4.18.28 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9f4724570795357eb097251a021f20c94c79b3054f3adb3bc0812143ba791dc1 imagePullPolicy: IfNotPresent name: machine-approver-controller resources: requests: cpu: 10m memory: 50Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /var/run/configmaps/config name: config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9zdwv readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true hostNetwork: true imagePullSecrets: - name: machine-approver-sa-dockercfg-dnxpd nodeName: master-0 nodeSelector: node-role.kubernetes.io/master: "" preemptionPolicy: PreemptLowerPriority priority: 2000000000 priorityClassName: system-cluster-critical restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: machine-approver-sa serviceAccountName: machine-approver-sa terminationGracePeriodSeconds: 30 tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 120 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 120 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - configMap: defaultMode: 420 name: kube-rbac-proxy name: auth-proxy-config - name: machine-approver-tls secret: defaultMode: 420 secretName: machine-approver-tls - configMap: defaultMode: 440 name: machine-approver-config optional: true name: config - name: kube-api-access-9zdwv projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-04T00:35:05Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-04T00:35:03Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-04T00:43:06Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-04T00:43:06Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-04T00:35:03Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://81cad02b11b4d95e55ba4c828f4386fb00387ee0d3613ec94bd4bb4c1a9dc17e image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b03d2897e7cc0e8d0c306acb68ca3d9396d502882c14942faadfdb16bc40e17d lastState: {} name: kube-rbac-proxy ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-04T00:35:04Z" volumeMounts: - mountPath: /etc/kube-rbac-proxy name: auth-proxy-config - mountPath: /etc/tls/private name: machine-approver-tls - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9zdwv readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://8000d81e035069ef4db06656d61f649acca4b5fa31c4f823032c4223b4001cee image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9f4724570795357eb097251a021f20c94c79b3054f3adb3bc0812143ba791dc1 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9f4724570795357eb097251a021f20c94c79b3054f3adb3bc0812143ba791dc1 lastState: terminated: containerID: cri-o://74dc0ef48d8fe1939f3624a82b33b96968f437acd029a208849d167b8818221f exitCode: 255 finishedAt: "2025-12-04T00:43:04Z" message: | eSigningRequest" source="kind source: *v1.CertificateSigningRequest" I1204 00:35:05.130309 1 controller.go:175] "Starting EventSource" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" source="kind source: *v1.ConfigMap" I1204 00:35:05.130317 1 controller.go:183] "Starting Controller" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" I1204 00:35:05.130372 1 status.go:97] Starting cluster operator status controller I1204 00:35:05.134154 1 reflector.go:368] Caches populated for *v1.ClusterOperator from github.com/openshift/cluster-machine-approver/status.go:99 I1204 00:35:05.253361 1 reflector.go:368] Caches populated for *v1.ConfigMap from sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106 I1204 00:35:05.336476 1 controller.go:217] "Starting workers" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" worker count=10 E1204 00:41:17.635070 1 leaderelection.go:429] Failed to update lock optimitically: Timeout: request did not complete within requested timeout - context deadline exceeded, falling back to slow path E1204 00:42:17.637720 1 leaderelection.go:436] error retrieving resource lock openshift-cluster-machine-approver/cluster-machine-approver-leader: the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io cluster-machine-approver-leader) I1204 00:42:30.632010 1 leaderelection.go:297] failed to renew lease openshift-cluster-machine-approver/cluster-machine-approver-leader: timed out waiting for the condition E1204 00:43:04.638848 1 leaderelection.go:322] Failed to release lock: Timeout: request did not complete within requested timeout - context deadline exceeded F1204 00:43:04.638967 1 main.go:244] unable to run the manager: leader election lost reason: Error startedAt: "2025-12-04T00:35:04Z" name: machine-approver-controller ready: true restartCount: 1 started: true state: running: startedAt: "2025-12-04T00:43:05Z" volumeMounts: - mountPath: /var/run/configmaps/config name: config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9zdwv readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.32.10 hostIPs: - ip: 192.168.32.10 phase: Running podIP: 192.168.32.10 podIPs: - ip: 192.168.32.10 qosClass: Burstable startTime: "2025-12-04T00:35:03Z" kind: PodList metadata: resourceVersion: "47221"