--- apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.130.0.20/23"],"mac_address":"0a:58:0a:82:00:14","gateway_ips":["10.130.0.1"],"routes":[{"dest":"10.128.0.0/14","nextHop":"10.130.0.1"},{"dest":"172.30.0.0/16","nextHop":"10.130.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.130.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.130.0.1"}],"ip_address":"10.130.0.20/23","gateway_ip":"10.130.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.130.0.20" ], "mac": "0a:58:0a:82:00:14", "default": true, "dns": {} }] target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' creationTimestamp: "2025-10-11T10:40:39Z" labels: app: guard managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:k8s.ovn.org/pod-networks: {} manager: master-0 operation: Update subresource: status time: "2025-10-11T10:40:39Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:k8s.v1.cni.cncf.io/network-status: {} manager: multus-daemon operation: Update subresource: status time: "2025-10-11T10:40:39Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: .: {} k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodReadyToStartContainers"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodScheduled"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:hostIPs: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.130.0.20"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2025-10-11T10:40:40Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:target.workload.openshift.io/management: {} f:labels: .: {} f:app: {} f:spec: f:containers: k:{"name":"guard"}: .: {} f:args: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:readinessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:host: {} f:path: {} f:port: {} f:scheme: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:hostname: {} f:nodeName: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} manager: cluster-kube-scheduler-operator operation: Update time: "2025-10-11T10:40:46Z" name: openshift-kube-scheduler-guard-master-0 namespace: openshift-kube-scheduler resourceVersion: "21271" uid: 95f1328a-5ab8-4276-9bfd-55b3dbb2a994 spec: containers: - args: - -c - | # properly handle TERM and exit as soon as it is signaled set -euo pipefail trap 'jobs -p | xargs -r kill; exit 0' TERM sleep infinity & wait command: - /bin/bash image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f81582ec6e6cc159d578a2d70ce7c8a4db8eb0172334226c9123770d7d2a1642 imagePullPolicy: IfNotPresent name: guard readinessProbe: failureThreshold: 3 httpGet: host: 192.168.34.10 path: healthz port: 10259 scheme: HTTPS periodSeconds: 5 successThreshold: 1 timeoutSeconds: 5 resources: requests: cpu: 10m memory: 5Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-tzddm readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true hostname: guard-9eab68cc2713b2521b472617ac39251c52e2a96d-end imagePullSecrets: - name: default-dockercfg-4hwjx nodeName: master-0 preemptionPolicy: PreemptLowerPriority priority: 2000000000 priorityClassName: system-cluster-critical restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 3 tolerations: - operator: Exists volumes: - name: kube-api-access-tzddm projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-10-11T10:40:40Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-10-11T10:40:39Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-10-11T10:40:40Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-10-11T10:40:40Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-10-11T10:40:39Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://0ce8427bbe6d75f182a5b32d8cd2f22fe40fc718730d32921af86371a9379011 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f81582ec6e6cc159d578a2d70ce7c8a4db8eb0172334226c9123770d7d2a1642 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f81582ec6e6cc159d578a2d70ce7c8a4db8eb0172334226c9123770d7d2a1642 lastState: {} name: guard ready: true restartCount: 0 started: true state: running: startedAt: "2025-10-11T10:40:40Z" volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-tzddm readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.34.10 hostIPs: - ip: 192.168.34.10 phase: Running podIP: 10.130.0.20 podIPs: - ip: 10.130.0.20 qosClass: Burstable startTime: "2025-10-11T10:40:39Z"