--- apiVersion: apps/v1 items: - apiVersion: apps/v1 kind: Deployment metadata: annotations: deployment.kubernetes.io/revision: "1" kubernetes.io/description: | This deployment launches the ovn-kubernetes controller (control-plane) networking components. networkoperator.openshift.io/cluster-network-cidr: 10.128.0.0/14 networkoperator.openshift.io/hybrid-overlay-status: disabled networkoperator.openshift.io/ip-family-mode: single-stack release.openshift.io/version: 4.18.25 creationTimestamp: "2025-10-11T10:27:25Z" generation: 1 labels: networkoperator.openshift.io/generates-operator-status: stand-alone managedFields: - apiVersion: apps/v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:kubernetes.io/description: {} f:networkoperator.openshift.io/cluster-network-cidr: {} f:networkoperator.openshift.io/hybrid-overlay-status: {} f:networkoperator.openshift.io/ip-family-mode: {} f:release.openshift.io/version: {} f:labels: f:networkoperator.openshift.io/generates-operator-status: {} f:ownerReferences: k:{"uid":"216d30b3-cc7f-49b9-949f-43cde8dd9ab2"}: {} f:spec: f:replicas: {} f:selector: {} f:strategy: f:rollingUpdate: f:maxSurge: {} f:maxUnavailable: {} f:type: {} f:template: f:metadata: f:annotations: f:networkoperator.openshift.io/cluster-network-cidr: {} f:networkoperator.openshift.io/hybrid-overlay-status: {} f:networkoperator.openshift.io/ip-family-mode: {} f:target.workload.openshift.io/management: {} f:labels: f:app: {} f:component: {} f:kubernetes.io/os: {} f:openshift.io/component: {} f:type: {} f:spec: f:containers: k:{"name":"kube-rbac-proxy"}: .: {} f:command: {} f:image: {} f:name: {} f:ports: k:{"containerPort":9108,"protocol":"TCP"}: .: {} f:containerPort: {} f:name: {} f:resources: f:requests: f:cpu: {} f:memory: {} f:terminationMessagePolicy: {} f:volumeMounts: k:{"mountPath":"/etc/pki/tls/metrics-cert"}: .: {} f:mountPath: {} f:name: {} f:readOnly: {} k:{"name":"ovnkube-cluster-manager"}: .: {} f:command: {} f:env: k:{"name":"K8S_NODE"}: .: {} f:name: {} f:valueFrom: f:fieldRef: {} k:{"name":"OVN_KUBE_LOG_LEVEL"}: .: {} f:name: {} f:value: {} k:{"name":"POD_NAME"}: .: {} f:name: {} f:valueFrom: f:fieldRef: {} f:image: {} f:name: {} f:ports: k:{"containerPort":29108,"protocol":"TCP"}: .: {} f:containerPort: {} f:name: {} f:resources: f:requests: f:cpu: {} f:memory: {} f:terminationMessagePolicy: {} f:volumeMounts: k:{"mountPath":"/env"}: .: {} f:mountPath: {} f:name: {} k:{"mountPath":"/run/ovnkube-config/"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:hostNetwork: {} f:nodeSelector: {} f:priorityClassName: {} f:serviceAccountName: {} f:tolerations: {} f:volumes: k:{"name":"env-overrides"}: .: {} f:configMap: f:name: {} f:optional: {} f:name: {} k:{"name":"ovn-control-plane-metrics-cert"}: .: {} f:name: {} f:secret: f:optional: {} f:secretName: {} k:{"name":"ovnkube-config"}: .: {} f:configMap: f:name: {} f:name: {} manager: cluster-network-operator/operconfig operation: Apply time: "2025-10-11T10:27:25Z" - apiVersion: apps/v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:deployment.kubernetes.io/revision: {} f:status: f:availableReplicas: {} f:conditions: .: {} k:{"type":"Available"}: .: {} f:lastTransitionTime: {} f:lastUpdateTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Progressing"}: .: {} f:lastTransitionTime: {} f:lastUpdateTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} f:observedGeneration: {} f:readyReplicas: {} f:replicas: {} f:updatedReplicas: {} manager: kube-controller-manager operation: Update subresource: status time: "2025-10-11T10:37:47Z" name: ovnkube-control-plane namespace: openshift-ovn-kubernetes ownerReferences: - apiVersion: operator.openshift.io/v1 blockOwnerDeletion: true controller: true kind: Network name: cluster uid: 216d30b3-cc7f-49b9-949f-43cde8dd9ab2 resourceVersion: "17967" uid: 35a6ae86-f5de-4e0a-8dad-0b70bc85f1db spec: progressDeadlineSeconds: 600 replicas: 2 revisionHistoryLimit: 10 selector: matchLabels: app: ovnkube-control-plane strategy: rollingUpdate: maxSurge: 0 maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: networkoperator.openshift.io/cluster-network-cidr: 10.128.0.0/14 networkoperator.openshift.io/hybrid-overlay-status: disabled networkoperator.openshift.io/ip-family-mode: single-stack target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' creationTimestamp: null labels: app: ovnkube-control-plane component: network kubernetes.io/os: linux openshift.io/component: network type: infra spec: containers: - command: - /bin/bash - -c - | #!/bin/bash set -euo pipefail TLS_PK=/etc/pki/tls/metrics-cert/tls.key TLS_CERT=/etc/pki/tls/metrics-cert/tls.crt # As the secret mount is optional we must wait for the files to be present. # The service is created in monitor.yaml and this is created in sdn.yaml. TS=$(date +%s) WARN_TS=$(( ${TS} + $(( 20 * 60)) )) HAS_LOGGED_INFO=0 log_missing_certs(){ CUR_TS=$(date +%s) if [[ "${CUR_TS}" -gt "WARN_TS" ]]; then echo $(date -Iseconds) WARN: ovn-control-plane-metrics-cert not mounted after 20 minutes. elif [[ "${HAS_LOGGED_INFO}" -eq 0 ]] ; then echo $(date -Iseconds) INFO: ovn-control-plane-metrics-cert not mounted. Waiting 20 minutes. HAS_LOGGED_INFO=1 fi } while [[ ! -f "${TLS_PK}" || ! -f "${TLS_CERT}" ]] ; do log_missing_certs sleep 5 done echo $(date -Iseconds) INFO: ovn-control-plane-metrics-certs mounted, starting kube-rbac-proxy exec /usr/bin/kube-rbac-proxy \ --logtostderr \ --secure-listen-address=:9108 \ --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 \ --upstream=http://127.0.0.1:29108/ \ --tls-private-key-file=${TLS_PK} \ --tls-cert-file=${TLS_CERT} image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f22b65e5c744a32d3955dd7c36d809e3114a8aa501b44c00330dfda886c21169 imagePullPolicy: IfNotPresent name: kube-rbac-proxy ports: - containerPort: 9108 name: https protocol: TCP resources: requests: cpu: 10m memory: 20Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/pki/tls/metrics-cert name: ovn-control-plane-metrics-cert readOnly: true - command: - /bin/bash - -c - | set -xe if [[ -f "/env/_master" ]]; then set -o allexport source "/env/_master" set +o allexport fi ovn_v4_join_subnet_opt= if [[ "" != "" ]]; then ovn_v4_join_subnet_opt="--gateway-v4-join-subnet " fi ovn_v6_join_subnet_opt= if [[ "" != "" ]]; then ovn_v6_join_subnet_opt="--gateway-v6-join-subnet " fi ovn_v4_transit_switch_subnet_opt= if [[ "" != "" ]]; then ovn_v4_transit_switch_subnet_opt="--cluster-manager-v4-transit-switch-subnet " fi ovn_v6_transit_switch_subnet_opt= if [[ "" != "" ]]; then ovn_v6_transit_switch_subnet_opt="--cluster-manager-v6-transit-switch-subnet " fi dns_name_resolver_enabled_flag= if [[ "false" == "true" ]]; then dns_name_resolver_enabled_flag="--enable-dns-name-resolver" fi persistent_ips_enabled_flag= if [[ "true" == "true" ]]; then persistent_ips_enabled_flag="--enable-persistent-ips" fi # This is needed so that converting clusters from GA to TP # will rollout control plane pods as well network_segmentation_enabled_flag= multi_network_enabled_flag= if [[ "true" == "true" ]]; then multi_network_enabled_flag="--enable-multi-network" network_segmentation_enabled_flag="--enable-network-segmentation" fi route_advertisements_enable_flag= if [[ "false" == "true" ]]; then route_advertisements_enable_flag="--enable-route-advertisements" fi echo "I$(date "+%m%d %H:%M:%S.%N") - ovnkube-control-plane - start ovnkube --init-cluster-manager ${K8S_NODE}" exec /usr/bin/ovnkube \ --enable-interconnect \ --init-cluster-manager "${K8S_NODE}" \ --config-file=/run/ovnkube-config/ovnkube.conf \ --loglevel "${OVN_KUBE_LOG_LEVEL}" \ --metrics-bind-address "127.0.0.1:29108" \ --metrics-enable-pprof \ --metrics-enable-config-duration \ ${ovn_v4_join_subnet_opt} \ ${ovn_v6_join_subnet_opt} \ ${ovn_v4_transit_switch_subnet_opt} \ ${ovn_v6_transit_switch_subnet_opt} \ ${dns_name_resolver_enabled_flag} \ ${persistent_ips_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ ${route_advertisements_enable_flag} env: - name: OVN_KUBE_LOG_LEVEL value: "4" - name: K8S_NODE valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b05c14f2032f7ba3017e9bcb6b3be4e7eaed8223e30a721b46b24f9cdcbd6a95 imagePullPolicy: IfNotPresent name: ovnkube-cluster-manager ports: - containerPort: 29108 name: metrics-port protocol: TCP resources: requests: cpu: 10m memory: 300Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /run/ovnkube-config/ name: ovnkube-config - mountPath: /env name: env-overrides dnsPolicy: Default hostNetwork: true nodeSelector: kubernetes.io/os: linux node-role.kubernetes.io/master: "" priorityClassName: system-cluster-critical restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: ovn-kubernetes-control-plane serviceAccountName: ovn-kubernetes-control-plane terminationGracePeriodSeconds: 30 tolerations: - key: node-role.kubernetes.io/master operator: Exists - key: node.kubernetes.io/not-ready operator: Exists - key: node.kubernetes.io/unreachable operator: Exists - key: node.kubernetes.io/network-unavailable operator: Exists volumes: - configMap: defaultMode: 420 name: ovnkube-config name: ovnkube-config - configMap: defaultMode: 420 name: env-overrides optional: true name: env-overrides - name: ovn-control-plane-metrics-cert secret: defaultMode: 420 optional: true secretName: ovn-control-plane-metrics-cert status: availableReplicas: 2 conditions: - lastTransitionTime: "2025-10-11T10:27:42Z" lastUpdateTime: "2025-10-11T10:27:42Z" message: Deployment has minimum availability. reason: MinimumReplicasAvailable status: "True" type: Available - lastTransitionTime: "2025-10-11T10:27:25Z" lastUpdateTime: "2025-10-11T10:27:46Z" message: ReplicaSet "ovnkube-control-plane-864d695c77" has successfully progressed. reason: NewReplicaSetAvailable status: "True" type: Progressing observedGeneration: 1 readyReplicas: 2 replicas: 2 updatedReplicas: 2 kind: DeploymentList metadata: resourceVersion: "64825"