--- apiVersion: apps/v1 items: - apiVersion: apps/v1 kind: ReplicaSet metadata: annotations: deployment.kubernetes.io/desired-replicas: "2" deployment.kubernetes.io/max-replicas: "2" deployment.kubernetes.io/revision: "1" kubernetes.io/description: | This deployment launches the ovn-kubernetes controller (control-plane) networking components. networkoperator.openshift.io/cluster-network-cidr: 10.128.0.0/14 networkoperator.openshift.io/hybrid-overlay-status: disabled networkoperator.openshift.io/ip-family-mode: single-stack release.openshift.io/version: 4.18.25 creationTimestamp: "2025-10-11T10:27:25Z" generation: 1 labels: app: ovnkube-control-plane component: network kubernetes.io/os: linux openshift.io/component: network pod-template-hash: 864d695c77 type: infra managedFields: - apiVersion: apps/v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:deployment.kubernetes.io/desired-replicas: {} f:deployment.kubernetes.io/max-replicas: {} f:deployment.kubernetes.io/revision: {} f:kubernetes.io/description: {} f:networkoperator.openshift.io/cluster-network-cidr: {} f:networkoperator.openshift.io/hybrid-overlay-status: {} f:networkoperator.openshift.io/ip-family-mode: {} f:release.openshift.io/version: {} f:labels: .: {} f:app: {} f:component: {} f:kubernetes.io/os: {} f:openshift.io/component: {} f:pod-template-hash: {} f:type: {} f:ownerReferences: .: {} k:{"uid":"35a6ae86-f5de-4e0a-8dad-0b70bc85f1db"}: {} f:spec: f:replicas: {} f:selector: {} f:template: f:metadata: f:annotations: .: {} f:networkoperator.openshift.io/cluster-network-cidr: {} f:networkoperator.openshift.io/hybrid-overlay-status: {} f:networkoperator.openshift.io/ip-family-mode: {} f:target.workload.openshift.io/management: {} f:labels: .: {} f:app: {} f:component: {} f:kubernetes.io/os: {} f:openshift.io/component: {} f:pod-template-hash: {} f:type: {} f:spec: f:containers: k:{"name":"kube-rbac-proxy"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":9108,"protocol":"TCP"}: .: {} f:containerPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/pki/tls/metrics-cert"}: .: {} f:mountPath: {} f:name: {} f:readOnly: {} k:{"name":"ovnkube-cluster-manager"}: .: {} f:command: {} f:env: .: {} k:{"name":"K8S_NODE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"OVN_KUBE_LOG_LEVEL"}: .: {} f:name: {} f:value: {} k:{"name":"POD_NAME"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":29108,"protocol":"TCP"}: .: {} f:containerPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/env"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/run/ovnkube-config/"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:hostNetwork: {} f:nodeSelector: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:serviceAccount: {} f:serviceAccountName: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} f:volumes: .: {} k:{"name":"env-overrides"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:optional: {} f:name: {} k:{"name":"ovn-control-plane-metrics-cert"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:optional: {} f:secretName: {} k:{"name":"ovnkube-config"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:name: {} manager: kube-controller-manager operation: Update time: "2025-10-11T10:27:25Z" - apiVersion: apps/v1 fieldsType: FieldsV1 fieldsV1: f:status: f:availableReplicas: {} f:fullyLabeledReplicas: {} f:observedGeneration: {} f:readyReplicas: {} f:replicas: {} manager: kube-controller-manager operation: Update subresource: status time: "2025-10-11T10:37:47Z" name: ovnkube-control-plane-864d695c77 namespace: openshift-ovn-kubernetes ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: Deployment name: ovnkube-control-plane uid: 35a6ae86-f5de-4e0a-8dad-0b70bc85f1db resourceVersion: "17965" uid: 68f88204-ab46-4829-8724-a6975f6c7dec spec: replicas: 2 selector: matchLabels: app: ovnkube-control-plane pod-template-hash: 864d695c77 template: metadata: annotations: networkoperator.openshift.io/cluster-network-cidr: 10.128.0.0/14 networkoperator.openshift.io/hybrid-overlay-status: disabled networkoperator.openshift.io/ip-family-mode: single-stack target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' creationTimestamp: null labels: app: ovnkube-control-plane component: network kubernetes.io/os: linux openshift.io/component: network pod-template-hash: 864d695c77 type: infra spec: containers: - command: - /bin/bash - -c - | #!/bin/bash set -euo pipefail TLS_PK=/etc/pki/tls/metrics-cert/tls.key TLS_CERT=/etc/pki/tls/metrics-cert/tls.crt # As the secret mount is optional we must wait for the files to be present. # The service is created in monitor.yaml and this is created in sdn.yaml. TS=$(date +%s) WARN_TS=$(( ${TS} + $(( 20 * 60)) )) HAS_LOGGED_INFO=0 log_missing_certs(){ CUR_TS=$(date +%s) if [[ "${CUR_TS}" -gt "WARN_TS" ]]; then echo $(date -Iseconds) WARN: ovn-control-plane-metrics-cert not mounted after 20 minutes. elif [[ "${HAS_LOGGED_INFO}" -eq 0 ]] ; then echo $(date -Iseconds) INFO: ovn-control-plane-metrics-cert not mounted. Waiting 20 minutes. HAS_LOGGED_INFO=1 fi } while [[ ! -f "${TLS_PK}" || ! -f "${TLS_CERT}" ]] ; do log_missing_certs sleep 5 done echo $(date -Iseconds) INFO: ovn-control-plane-metrics-certs mounted, starting kube-rbac-proxy exec /usr/bin/kube-rbac-proxy \ --logtostderr \ --secure-listen-address=:9108 \ --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 \ --upstream=http://127.0.0.1:29108/ \ --tls-private-key-file=${TLS_PK} \ --tls-cert-file=${TLS_CERT} image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f22b65e5c744a32d3955dd7c36d809e3114a8aa501b44c00330dfda886c21169 imagePullPolicy: IfNotPresent name: kube-rbac-proxy ports: - containerPort: 9108 name: https protocol: TCP resources: requests: cpu: 10m memory: 20Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/pki/tls/metrics-cert name: ovn-control-plane-metrics-cert readOnly: true - command: - /bin/bash - -c - | set -xe if [[ -f "/env/_master" ]]; then set -o allexport source "/env/_master" set +o allexport fi ovn_v4_join_subnet_opt= if [[ "" != "" ]]; then ovn_v4_join_subnet_opt="--gateway-v4-join-subnet " fi ovn_v6_join_subnet_opt= if [[ "" != "" ]]; then ovn_v6_join_subnet_opt="--gateway-v6-join-subnet " fi ovn_v4_transit_switch_subnet_opt= if [[ "" != "" ]]; then ovn_v4_transit_switch_subnet_opt="--cluster-manager-v4-transit-switch-subnet " fi ovn_v6_transit_switch_subnet_opt= if [[ "" != "" ]]; then ovn_v6_transit_switch_subnet_opt="--cluster-manager-v6-transit-switch-subnet " fi dns_name_resolver_enabled_flag= if [[ "false" == "true" ]]; then dns_name_resolver_enabled_flag="--enable-dns-name-resolver" fi persistent_ips_enabled_flag= if [[ "true" == "true" ]]; then persistent_ips_enabled_flag="--enable-persistent-ips" fi # This is needed so that converting clusters from GA to TP # will rollout control plane pods as well network_segmentation_enabled_flag= multi_network_enabled_flag= if [[ "true" == "true" ]]; then multi_network_enabled_flag="--enable-multi-network" network_segmentation_enabled_flag="--enable-network-segmentation" fi route_advertisements_enable_flag= if [[ "false" == "true" ]]; then route_advertisements_enable_flag="--enable-route-advertisements" fi echo "I$(date "+%m%d %H:%M:%S.%N") - ovnkube-control-plane - start ovnkube --init-cluster-manager ${K8S_NODE}" exec /usr/bin/ovnkube \ --enable-interconnect \ --init-cluster-manager "${K8S_NODE}" \ --config-file=/run/ovnkube-config/ovnkube.conf \ --loglevel "${OVN_KUBE_LOG_LEVEL}" \ --metrics-bind-address "127.0.0.1:29108" \ --metrics-enable-pprof \ --metrics-enable-config-duration \ ${ovn_v4_join_subnet_opt} \ ${ovn_v6_join_subnet_opt} \ ${ovn_v4_transit_switch_subnet_opt} \ ${ovn_v6_transit_switch_subnet_opt} \ ${dns_name_resolver_enabled_flag} \ ${persistent_ips_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ ${route_advertisements_enable_flag} env: - name: OVN_KUBE_LOG_LEVEL value: "4" - name: K8S_NODE valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b05c14f2032f7ba3017e9bcb6b3be4e7eaed8223e30a721b46b24f9cdcbd6a95 imagePullPolicy: IfNotPresent name: ovnkube-cluster-manager ports: - containerPort: 29108 name: metrics-port protocol: TCP resources: requests: cpu: 10m memory: 300Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /run/ovnkube-config/ name: ovnkube-config - mountPath: /env name: env-overrides dnsPolicy: Default hostNetwork: true nodeSelector: kubernetes.io/os: linux node-role.kubernetes.io/master: "" priorityClassName: system-cluster-critical restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: ovn-kubernetes-control-plane serviceAccountName: ovn-kubernetes-control-plane terminationGracePeriodSeconds: 30 tolerations: - key: node-role.kubernetes.io/master operator: Exists - key: node.kubernetes.io/not-ready operator: Exists - key: node.kubernetes.io/unreachable operator: Exists - key: node.kubernetes.io/network-unavailable operator: Exists volumes: - configMap: defaultMode: 420 name: ovnkube-config name: ovnkube-config - configMap: defaultMode: 420 name: env-overrides optional: true name: env-overrides - name: ovn-control-plane-metrics-cert secret: defaultMode: 420 optional: true secretName: ovn-control-plane-metrics-cert status: availableReplicas: 2 fullyLabeledReplicas: 2 observedGeneration: 1 readyReplicas: 2 replicas: 2 kind: ReplicaSetList metadata: resourceVersion: "64825"