--- apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.128.0.8/23"],"mac_address":"0a:58:0a:80:00:08","gateway_ips":["10.128.0.1"],"routes":[{"dest":"10.128.0.0/16","nextHop":"10.128.0.1"},{"dest":"172.30.0.0/16","nextHop":"10.128.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.128.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.128.0.1"}],"ip_address":"10.128.0.8/23","gateway_ip":"10.128.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.128.0.8" ], "mac": "0a:58:0a:80:00:08", "default": true, "dns": {} }] openshift.io/required-scc: restricted-v2 openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default creationTimestamp: "2025-12-04T21:58:24Z" generateName: package-server-manager-67477646d4- labels: app: package-server-manager pod-template-hash: 67477646d4 managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:openshift.io/required-scc: {} f:target.workload.openshift.io/management: {} f:generateName: {} f:labels: .: {} f:app: {} f:pod-template-hash: {} f:ownerReferences: .: {} k:{"uid":"e1b57413-9cbb-4b34-8010-9e5e4450ca3d"}: {} f:spec: f:containers: k:{"name":"kube-rbac-proxy"}: .: {} f:args: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:ports: .: {} k:{"containerPort":8443,"protocol":"TCP"}: .: {} f:containerPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:securityContext: .: {} f:allowPrivilegeEscalation: {} f:capabilities: .: {} f:drop: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/tls/private"}: .: {} f:mountPath: {} f:name: {} k:{"name":"package-server-manager"}: .: {} f:args: {} f:command: {} f:env: .: {} k:{"name":"GOMEMLIMIT"}: .: {} f:name: {} f:value: {} k:{"name":"PACKAGESERVER_IMAGE"}: .: {} f:name: {} f:value: {} k:{"name":"PACKAGESERVER_NAME"}: .: {} f:name: {} f:value: {} k:{"name":"PACKAGESERVER_NAMESPACE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: {} k:{"name":"RELEASE_VERSION"}: .: {} f:name: {} f:value: {} f:image: {} f:imagePullPolicy: {} f:livenessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:path: {} f:port: {} f:scheme: {} f:initialDelaySeconds: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:name: {} f:readinessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:path: {} f:port: {} f:scheme: {} f:initialDelaySeconds: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:securityContext: .: {} f:allowPrivilegeEscalation: {} f:capabilities: .: {} f:drop: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:nodeSelector: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: .: {} f:runAsNonRoot: {} f:seccompProfile: .: {} f:type: {} f:serviceAccount: {} f:serviceAccountName: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} f:volumes: .: {} k:{"name":"package-server-manager-serving-cert"}: .: {} f:name: {} f:secret: .: {} f:defaultMode: {} f:secretName: {} manager: kube-controller-manager operation: Update time: "2025-12-04T21:58:24Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: .: {} k:{"type":"PodScheduled"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} manager: kube-scheduler operation: Update subresource: status time: "2025-12-04T21:58:24Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:k8s.ovn.org/pod-networks: {} manager: master-0 operation: Update subresource: status time: "2025-12-04T22:00:26Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:k8s.v1.cni.cncf.io/network-status: {} manager: multus-daemon operation: Update subresource: status time: "2025-12-04T22:01:09Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodReadyToStartContainers"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:hostIPs: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.128.0.8"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2025-12-04T22:19:43Z" name: package-server-manager-67477646d4-bslb5 namespace: openshift-operator-lifecycle-manager ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: package-server-manager-67477646d4 uid: e1b57413-9cbb-4b34-8010-9e5e4450ca3d resourceVersion: "13140" uid: 813f3ee7-35b5-4ee8-b453-00d16d910eae spec: containers: - args: - --secure-listen-address=0.0.0.0:8443 - --upstream=http://127.0.0.1:9090/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key - --logtostderr=true image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69ffd8f8dcceedc2d6eb306cea33f8beabc1be1308cd5f4ee8b9a8e3eab9843 imagePullPolicy: IfNotPresent name: kube-rbac-proxy ports: - containerPort: 8443 name: metrics protocol: TCP resources: requests: cpu: 10m memory: 20Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsUser: 1000400000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/tls/private name: package-server-manager-serving-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-8w592 readOnly: true - args: - --name - $(PACKAGESERVER_NAME) - --namespace - $(PACKAGESERVER_NAMESPACE) - --metrics=:9090 command: - /bin/psm - start env: - name: PACKAGESERVER_NAME value: packageserver - name: PACKAGESERVER_IMAGE value: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f1ca78c423f43f89a0411e40393642f64e4f8df9e5f61c25e31047c4cce170f9 - name: PACKAGESERVER_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: RELEASE_VERSION value: 4.18.29 - name: GOMEMLIMIT value: 5MiB image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f1ca78c423f43f89a0411e40393642f64e4f8df9e5f61c25e31047c4cce170f9 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: path: /healthz port: 8080 scheme: HTTP initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 name: package-server-manager readinessProbe: failureThreshold: 3 httpGet: path: /healthz port: 8080 scheme: HTTP initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 resources: requests: cpu: 10m memory: 10Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsUser: 1000400000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-8w592 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: master-0 nodeSelector: kubernetes.io/os: linux node-role.kubernetes.io/master: "" preemptionPolicy: PreemptLowerPriority priority: 2000000000 priorityClassName: system-cluster-critical restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000400000 runAsNonRoot: true seLinuxOptions: level: s0:c20,c10 seccompProfile: type: RuntimeDefault serviceAccount: olm-operator-serviceaccount serviceAccountName: olm-operator-serviceaccount terminationGracePeriodSeconds: 30 tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 120 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 120 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - name: package-server-manager-serving-cert secret: defaultMode: 420 secretName: package-server-manager-serving-cert - name: kube-api-access-8w592 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-04T22:01:21Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-04T22:00:26Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-04T22:19:42Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-04T22:19:42Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-04T22:00:26Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://0744bc69885cb8b27025aac2761602ed1dd53a9e628a283b5e3ef1e171da58fa image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69ffd8f8dcceedc2d6eb306cea33f8beabc1be1308cd5f4ee8b9a8e3eab9843 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69ffd8f8dcceedc2d6eb306cea33f8beabc1be1308cd5f4ee8b9a8e3eab9843 lastState: {} name: kube-rbac-proxy ready: true restartCount: 0 started: true state: running: startedAt: "2025-12-04T22:01:09Z" volumeMounts: - mountPath: /etc/tls/private name: package-server-manager-serving-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-8w592 readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://34e81e1de548a0f7a7581ef26a98220c98e277cec852a516b3aef35984f983d0 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f1ca78c423f43f89a0411e40393642f64e4f8df9e5f61c25e31047c4cce170f9 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f1ca78c423f43f89a0411e40393642f64e4f8df9e5f61c25e31047c4cce170f9 lastState: terminated: containerID: cri-o://f11072c38e40de60dafeffc2c5ef9e1780820ca0ce672700aba155fa414fe72c exitCode: 1 finishedAt: "2025-12-04T22:15:36Z" message: "ock\": context deadline exceeded\nI1204 22:15:02.022323 1 leaderelection.go:297] failed to renew lease openshift-operator-lifecycle-manager/packageserver-controller-lock: timed out waiting for the condition\nE1204 22:15:36.030327 1 leaderelection.go:322] Failed to release lock: Timeout: request did not complete within requested timeout - context deadline exceeded\n2025-12-04T22:15:36Z\tINFO\tStopping and waiting for non leader election runnables\n2025-12-04T22:15:36Z\tINFO\tStopping and waiting for leader election runnables\n2025-12-04T22:15:36Z\tINFO\tStopping and waiting for caches\n2025-12-04T22:15:36Z\tINFO\tStopping and waiting for webhooks\n2025-12-04T22:15:36Z\tINFO\tStopping and waiting for HTTP servers\n2025-12-04T22:15:36Z\tINFO\tWait completed, proceeding to shutdown the manager\n2025-12-04T22:15:36Z\tDEBUG\tevents\tpackage-server-manager-67477646d4-bslb5_132e2923-045c-417a-aeae-1d429fc40a12 stopped leading\t{\"type\": \"Normal\", \"object\": {\"kind\":\"Lease\",\"namespace\":\"openshift-operator-lifecycle-manager\",\"name\":\"packageserver-controller-lock\",\"uid\":\"7e409965-44b6-4295-bc9b-e41499cb1e3b\",\"apiVersion\":\"coordination.k8s.io/v1\",\"resourceVersion\":\"11421\"}, \"reason\": \"LeaderElection\"}\n2025-12-04T22:15:36Z\tINFO\tShutdown signal received, waiting for all workers to finish\t{\"controller\": \"clusterserviceversion\", \"controllerGroup\": \"operators.coreos.com\", \"controllerKind\": \"ClusterServiceVersion\"}\n2025-12-04T22:15:36Z\tERROR\tsetup\tproblem running manager\t{\"error\": \"leader election lost\"}\nmain.run\n\t/build/cmd/package-server-manager/main.go:150\ngithub.com/spf13/cobra.(*Command).execute\n\t/build/vendor/github.com/spf13/cobra/command.go:985\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/build/vendor/github.com/spf13/cobra/command.go:1117\ngithub.com/spf13/cobra.(*Command).Execute\n\t/build/vendor/github.com/spf13/cobra/command.go:1041\nmain.main\n\t/build/cmd/package-server-manager/main.go:43\nruntime.main\n\t/usr/lib/golang/src/runtime/proc.go:271\nError: leader election lost\nencountered an error while executing the binary: leader election lost\n" reason: Error startedAt: "2025-12-04T22:01:21Z" name: package-server-manager ready: true restartCount: 1 started: true state: running: startedAt: "2025-12-04T22:15:37Z" volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-8w592 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.32.10 hostIPs: - ip: 192.168.32.10 phase: Running podIP: 10.128.0.8 podIPs: - ip: 10.128.0.8 qosClass: Burstable startTime: "2025-12-04T22:00:26Z"