Name:             telemetry-operator-controller-manager-5fb668c8c9-r2wxn
Namespace:        openstack-operators
Priority:         0
Service Account:  telemetry-operator-controller-manager
Node:             crc/192.168.126.11
Start Time:       Wed, 28 Jan 2026 16:58:11 +0000
Labels:           control-plane=controller-manager
                  openstack.org/operator-name=telemetry
                  pod-template-hash=5fb668c8c9
Annotations:      k8s.ovn.org/pod-networks:
                    {"default":{"ip_addresses":["10.217.0.86/23"],"mac_address":"0a:58:0a:d9:00:56","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0...
                  k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "ovn-kubernetes",
                        "interface": "eth0",
                        "ips": [
                            "10.217.0.86"
                        ],
                        "mac": "0a:58:0a:d9:00:56",
                        "default": true,
                        "dns": {}
                    }]
                  kubectl.kubernetes.io/default-container: manager
                  openshift.io/scc: anyuid
Status:           Running
IP:               10.217.0.86
IPs:
  IP:           10.217.0.86
Controlled By:  ReplicaSet/telemetry-operator-controller-manager-5fb668c8c9
Containers:
  manager:
    Container ID:  cri-o://a18537c3e1e6e451b38bad6a123aeb792bb6adef885ef3fca3b51432430b6319
    Image:         quay.io/openstack-k8s-operators/telemetry-operator@sha256:4a380305a16571270007587ba70ff92459082d7078b6101eaf9f78cc53d67f2f
    Image ID:      quay.io/openstack-k8s-operators/telemetry-operator@sha256:4a380305a16571270007587ba70ff92459082d7078b6101eaf9f78cc53d67f2f
    Port:          <none>
    Host Port:     <none>
    Command:
      /manager
    Args:
      --health-probe-bind-address=:8081
      --metrics-bind-address=127.0.0.1:8080
      --leader-elect
    State:          Running
      Started:      Wed, 28 Jan 2026 17:15:57 +0000
    Last State:     Terminated
      Reason:       Error
      Exit Code:    1
      Started:      Wed, 28 Jan 2026 17:07:35 +0000
      Finished:     Wed, 28 Jan 2026 17:15:49 +0000
    Ready:          True
    Restart Count:  2
    Limits:
      cpu:     500m
      memory:  512Mi
    Requests:
      cpu:      10m
      memory:   256Mi
    Liveness:   http-get http://:8081/healthz delay=15s timeout=1s period=20s #success=1 #failure=3
    Readiness:  http-get http://:8081/readyz delay=5s timeout=1s period=10s #success=1 #failure=3
    Environment:
      LEASE_DURATION:   30
      RENEW_DEADLINE:   20
      RETRY_PERIOD:     5
      ENABLE_WEBHOOKS:  false
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-zks5s (ro)
  kube-rbac-proxy:
    Container ID:  cri-o://e50867894e5f977995900074f528ca383a7ca18e7c18243b87fd0c3de6895b8e
    Image:         quay.io/openstack-k8s-operators/kube-rbac-proxy@sha256:d28df2924a366ed857d6c2c14baac9741238032d41f3d02c12cd757189b68b8a
    Image ID:      quay.io/openstack-k8s-operators/kube-rbac-proxy@sha256:d28df2924a366ed857d6c2c14baac9741238032d41f3d02c12cd757189b68b8a
    Port:          8443/TCP
    Host Port:     0/TCP
    Args:
      --secure-listen-address=0.0.0.0:8443
      --upstream=http://127.0.0.1:8080/
      --logtostderr=true
      --v=0
    State:          Running
      Started:      Wed, 28 Jan 2026 16:59:38 +0000
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     500m
      memory:  128Mi
    Requests:
      cpu:        5m
      memory:     64Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-zks5s (ro)
Conditions:
  Type                        Status
  PodReadyToStartContainers   True 
  Initialized                 True 
  Ready                       True 
  ContainersReady             True 
  PodScheduled                True 
Volumes:
  kube-api-access-zks5s:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
    ConfigMapName:           openshift-service-ca.crt
    ConfigMapOptional:       <nil>
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 120s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 120s
Events:
  Type     Reason                           Age                From               Message
  ----     ------                           ----               ----               -------
  Normal   Scheduled                        36m                default-scheduler  Successfully assigned openstack-operators/telemetry-operator-controller-manager-5fb668c8c9-r2wxn to crc
  Normal   AddedInterface                   36m                multus             Add eth0 [10.217.0.86/23] from ovn-kubernetes
  Warning  Failed                           35m                kubelet            Failed to pull image "quay.io/openstack-k8s-operators/telemetry-operator@sha256:4a380305a16571270007587ba70ff92459082d7078b6101eaf9f78cc53d67f2f": rpc error: code = Canceled desc = copying config: context canceled
  Warning  Failed                           35m                kubelet            Error: ErrImagePull
  Normal   Pulled                           35m                kubelet            Container image "quay.io/openstack-k8s-operators/kube-rbac-proxy@sha256:d28df2924a366ed857d6c2c14baac9741238032d41f3d02c12cd757189b68b8a" already present on machine
  Normal   Created                          35m                kubelet            Created container kube-rbac-proxy
  Normal   Started                          35m                kubelet            Started container kube-rbac-proxy
  Normal   Pulling                          34m (x2 over 36m)  kubelet            Pulling image "quay.io/openstack-k8s-operators/telemetry-operator@sha256:4a380305a16571270007587ba70ff92459082d7078b6101eaf9f78cc53d67f2f"
  Normal   Pulled                           34m                kubelet            Successfully pulled image "quay.io/openstack-k8s-operators/telemetry-operator@sha256:4a380305a16571270007587ba70ff92459082d7078b6101eaf9f78cc53d67f2f" in 5.734s (5.734s including waiting). Image size: 182277608 bytes.
  Normal   Created                          27m (x2 over 34m)  kubelet            Created container manager
  Normal   Started                          27m (x2 over 34m)  kubelet            Started container manager
  Warning  Unhealthy                        18m (x4 over 27m)  kubelet            Readiness probe failed: Get "http://10.217.0.86:8081/readyz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
  Warning  Unhealthy                        18m (x3 over 27m)  kubelet            Readiness probe failed: Get "http://10.217.0.86:8081/readyz": dial tcp 10.217.0.86:8081: connect: connection refused
  Warning  Unhealthy                        18m (x3 over 27m)  kubelet            Liveness probe failed: Get "http://10.217.0.86:8081/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
  Warning  Unhealthy                        18m (x2 over 27m)  kubelet            Liveness probe failed: Get "http://10.217.0.86:8081/healthz": dial tcp 10.217.0.86:8081: connect: connection refused
  Normal   Pulled                           18m (x2 over 27m)  kubelet            Container image "quay.io/openstack-k8s-operators/telemetry-operator@sha256:4a380305a16571270007587ba70ff92459082d7078b6101eaf9f78cc53d67f2f" already present on machine
  Warning  FailedToRetrieveImagePullSecret  18m                kubelet            Unable to retrieve some image pull secrets (telemetry-operator-controller-manager-dockercfg-zq6nx); attempting to pull the image may not succeed.
  Normal   Killing                          18m                kubelet            Container manager failed liveness probe, will be restarted
