--- apiVersion: v1 items: - apiVersion: v1 kind: Pod metadata: creationTimestamp: "2026-03-19T11:57:48Z" generateName: machine-approver-5c6485487f- labels: app: machine-approver pod-template-hash: 5c6485487f managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:target.workload.openshift.io/management: {} f:generateName: {} f:labels: .: {} f:app: {} f:pod-template-hash: {} f:ownerReferences: .: {} k:{"uid":"4e5c4abf-d92a-4583-a534-1dc082d49a4b"}: {} f:spec: f:containers: k:{"name":"kube-rbac-proxy"}: .: {} f:args: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":9192,"protocol":"TCP"}: .: {} f:containerPort: {} f:hostPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/kube-rbac-proxy"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/etc/tls/private"}: .: {} f:mountPath: {} f:name: {} k:{"name":"machine-approver-controller"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"RELEASE_VERSION"}: .: {} f:name: {} f:value: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/var/run/configmaps/config"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:hostNetwork: {} f:nodeSelector: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:serviceAccount: {} f:serviceAccountName: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} f:volumes: .: {} k:{"name":"auth-proxy-config"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:name: {} k:{"name":"config"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:optional: {} f:name: {} k:{"name":"machine-approver-tls"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:secretName: {} manager: kube-controller-manager operation: Update time: "2026-03-19T11:57:48Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodReadyToStartContainers"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:hostIPs: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"192.168.32.10"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2026-03-19T12:07:11Z" name: machine-approver-5c6485487f-b2w5m namespace: openshift-cluster-machine-approver ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: machine-approver-5c6485487f uid: 4e5c4abf-d92a-4583-a534-1dc082d49a4b resourceVersion: "13418" uid: 81ba5bc0-fec9-41e5-b3d9-df17d7f1bd2a spec: containers: - args: - --secure-listen-address=0.0.0.0:9192 - --upstream=http://127.0.0.1:9191/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key - --config-file=/etc/kube-rbac-proxy/config-file.yaml - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - --logtostderr=true - --v=3 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d12d0dc7eb86bbedf6b2d7689a28fd51f0d928f720e4a6783744304297c661ed imagePullPolicy: IfNotPresent name: kube-rbac-proxy ports: - containerPort: 9192 hostPort: 9192 name: https protocol: TCP resources: requests: cpu: 10m memory: 20Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/kube-rbac-proxy name: auth-proxy-config - mountPath: /etc/tls/private name: machine-approver-tls - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-v5cc5 readOnly: true - args: - --config=/var/run/configmaps/config/config.yaml - -v=2 - --logtostderr - --leader-elect=true - --leader-elect-lease-duration=137s - --leader-elect-renew-deadline=107s - --leader-elect-retry-period=26s - --leader-elect-resource-namespace=openshift-cluster-machine-approver - --api-group-version=machine.openshift.io/v1beta1 - --max-concurrent-reconciles=10 command: - /usr/bin/machine-approver env: - name: RELEASE_VERSION value: 4.18.35 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdd28dfe7132e19af9f013f72cf120d970bc31b6b74693af262f8d2e82a096e1 imagePullPolicy: IfNotPresent name: machine-approver-controller resources: requests: cpu: 10m memory: 50Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /var/run/configmaps/config name: config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-v5cc5 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true hostNetwork: true imagePullSecrets: - name: machine-approver-sa-dockercfg-h5fcf nodeName: master-0 nodeSelector: node-role.kubernetes.io/master: "" preemptionPolicy: PreemptLowerPriority priority: 2000000000 priorityClassName: system-cluster-critical restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: machine-approver-sa serviceAccountName: machine-approver-sa terminationGracePeriodSeconds: 30 tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 120 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 120 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - configMap: defaultMode: 420 name: kube-rbac-proxy name: auth-proxy-config - name: machine-approver-tls secret: defaultMode: 420 secretName: machine-approver-tls - configMap: defaultMode: 440 name: machine-approver-config optional: true name: config - name: kube-api-access-v5cc5 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2026-03-19T11:57:50Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2026-03-19T11:57:48Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2026-03-19T12:05:24Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2026-03-19T12:05:24Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2026-03-19T11:57:48Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://ac74b38fd11c535ddb03b556aacc67450651b8de55a732a49658d786ebe0a734 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d12d0dc7eb86bbedf6b2d7689a28fd51f0d928f720e4a6783744304297c661ed imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d12d0dc7eb86bbedf6b2d7689a28fd51f0d928f720e4a6783744304297c661ed lastState: {} name: kube-rbac-proxy ready: true restartCount: 0 started: true state: running: startedAt: "2026-03-19T11:57:49Z" volumeMounts: - mountPath: /etc/kube-rbac-proxy name: auth-proxy-config - mountPath: /etc/tls/private name: machine-approver-tls - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-v5cc5 readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://a23a6b5358beeaf5e645643ffb405a80fb4cf54ade8b5a4f38d294751cca2a76 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdd28dfe7132e19af9f013f72cf120d970bc31b6b74693af262f8d2e82a096e1 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdd28dfe7132e19af9f013f72cf120d970bc31b6b74693af262f8d2e82a096e1 lastState: terminated: containerID: cri-o://7fc5e15dbb622b807625a944b8961634a70f1aecfe4e4c996f59e0fe79999af9 exitCode: 255 finishedAt: "2026-03-19T12:05:23Z" message: | icatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" I0319 11:57:49.941236 1 recorder.go:104] "master-0_bd97f499-6c53-4822-80b8-68c6cffc7e73 became leader" logger="events" type="Normal" object={"kind":"Lease","namespace":"openshift-cluster-machine-approver","name":"cluster-machine-approver-leader","uid":"5b9a19ea-d3b2-48f3-a8aa-d73233e9c28d","apiVersion":"coordination.k8s.io/v1","resourceVersion":"11659"} reason="LeaderElection" I0319 11:57:49.941269 1 status.go:97] Starting cluster operator status controller I0319 11:57:49.948591 1 reflector.go:368] Caches populated for *v1.ClusterOperator from github.com/openshift/cluster-machine-approver/status.go:99 I0319 11:57:50.163309 1 reflector.go:368] Caches populated for *v1.ConfigMap from sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106 I0319 11:57:50.247128 1 controller.go:217] "Starting workers" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" worker count=10 E0319 12:03:36.195320 1 leaderelection.go:429] Failed to update lock optimitically: Timeout: request did not complete within requested timeout - context deadline exceeded, falling back to slow path E0319 12:04:36.198112 1 leaderelection.go:436] error retrieving resource lock openshift-cluster-machine-approver/cluster-machine-approver-leader: the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io cluster-machine-approver-leader) I0319 12:04:49.190914 1 leaderelection.go:297] failed to renew lease openshift-cluster-machine-approver/cluster-machine-approver-leader: timed out waiting for the condition E0319 12:05:23.208508 1 leaderelection.go:322] Failed to release lock: Timeout: request did not complete within requested timeout - context deadline exceeded F0319 12:05:23.208607 1 main.go:244] unable to run the manager: leader election lost reason: Error startedAt: "2026-03-19T11:57:49Z" name: machine-approver-controller ready: true restartCount: 1 started: true state: running: startedAt: "2026-03-19T12:05:24Z" volumeMounts: - mountPath: /var/run/configmaps/config name: config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-v5cc5 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.32.10 hostIPs: - ip: 192.168.32.10 phase: Running podIP: 192.168.32.10 podIPs: - ip: 192.168.32.10 qosClass: Burstable startTime: "2026-03-19T11:57:48Z" kind: PodList metadata: resourceVersion: "51395"